import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import to_categorical
from keras.preprocessing import image
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import csv
%matplotlib inline
# Uploaded CSV files to google colab space
from google.colab import files
uploaded = files.upload()
# Uploaded dataset from google drive to google colab space
from google.colab import drive
drive.mount('/content/gdrive')
train_image = []
for i in tqdm(range(train.shape[0])):
img = image.load_img('/content/gdrive/My Drive/colab_data_2/'+train['id'][i],target_size=(64,64,3))
img = image.img_to_array(img)
img = img/255
train_image.append(img)
X = np.array(train_image)
y = np.array(train.drop(['id', 'Emotions'],axis=1))
#Dividing test data and training data
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42, test_size=0.1)
#Created CNN Keras Model model = Sequential()
model.add(Conv2D(filters=16, kernel_size=(3, 3), activation="relu", input_shape=(64,64,3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(5, activation='softmax'))
model.summary()
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
model.fit(X_train, y_train, epochs=10, validation_data=(X_test, y_test), batch_size=15)
#Testing the image
test_img = 'test811.png'
img = image.load_img('/content/gdrive/My Drive/colab_data_2/'+test_img,target_size=(64,64,3))
img = image.img_to_array(img)
img = img/255
count = 0
with open('/content/emo_count.csv','r') as emo:
csvReader = csv.reader(emo)
for row in csvReader:
if row[0]==test_img:
count = row[1]
print(count)
#Predicting the Emojis in an image
classes = np.array(train.columns[2:])
proba = model.predict(img.reshape(1,64,64,3))
top_3 = np.argsort(proba[0])[:-4:-1]
emotions = {}
print("Predicted Classification:")
for i in range(int(count)):
# print("{}".format(classes[top_3[i]])+" ({:.3})".format(proba[0][top_3[i]]))
print("{}".format(classes[top_3[i]]))
print()
print("Probabilities of each class:")
for p in range(len(proba[0])):
emotions[classes[p]] = proba[0][p]
print(emotions)
plt.imshow(img)